Starting to look at linear regression for predicting pitch pine presence/abundance. I think markdown scripts are easier to keep neat and organize.
All counts are included in this script (PIRI, Shrub Oak, and Other categories)
Libraries:
# libraries -------------------
library(dplyr)
library(tidyverse)
library(ggplot2)
library(GGally)
library(glmmTMB)
library(TMB)
library(emmeans)
library(DHARMa)
library(lmtest)
library(ggeffects)
library(performance)
Set working directory
knitr::opts_knit$set(root.dir = '/Users/user/Desktop/Data/Regen_RProj/')
Functions:
# functions -------------------
Import small seedling data:
# global variables -------------------
source("Scripts/SS_Import.R")
Pivot wider to create dataframe where each row is for one plot and has total details for each species group
ss_merge2 <- ss_merge %>%
select(Plot_No, Region, Treat_Type, Site, Species_Groups, Total) #this is dropping stump sprout, browse, and germinate data
ss_merge2 <- ss_merge2 %>%
pivot_wider(names_from = Species_Groups, values_from = Total)
Import time since data and add it to the small seedling dataset
time_since <- read_csv("CleanData/Treat_Year_Data.csv")
ss_merge3 <- merge(ss_merge2, time_since, by = "Site")
#log transform time from treatment data
ss_merge3$l.TFT <- log(ss_merge3$Time_from_Treat)
Run the ‘Add_BA’ script and merge with dataset:
source("Scripts/Add_BA.R")
# merge with ss dataset -------------------
ss_merge4 <- merge(ss_merge3, prism_BA, by = "Plot_No")
Run ‘Ground_Data.R’ script and add it to small seedling dataset:
source("Scripts/Ground_Data.R")
# merge with ss dataset -------------------
ss_merge5 <- merge(ss_merge4, ground3, by = "Plot_No")
rm(ss_merge2,
ss_merge3,
ss_merge4)
Source and add veg data
source("Scripts/Veg_Data.R")
# merge with ss dataset
ss.all <- merge(ss_merge5, veg3, by = "Plot_No")
Create log transformed categories for newly added variables, then select for just the desired variables:
ss.all$l.PIRI <- log(ss.all$PIRI + 1)
ss.all$l.SO <- log(ss.all$Shrub_Oak + 1)
ss.all$l.other <- log(ss.all$Other + 1)
ss.all2 <- ss.all %>%
select(Treat_Type, Region, Site, Plot_No, PIRI, l.PIRI, Shrub_Oak, l.SO, Other, l.other, Time_from_Treat, l.TFT, BA_HA, l.BA_HA, PIRI.BA_HA, l.BA_piri, Mineral_Soil, l.Mineral, Litter_Duff, avgLD, avgLD_l, Veg_Total, l.Veg_Total) %>%
arrange(Treat_Type)
Select just for numerical vs log and then look at paired plots:
#not transformed
ss.num <- ss.all2 %>%
select(PIRI, Shrub_Oak, Other, Time_from_Treat, BA_HA, PIRI.BA_HA, Mineral_Soil, avgLD, Veg_Total, Treat_Type)
ggpairs(ss.num, aes(color = Treat_Type))
ggpairs(ss.num)
#log transformed
ss.numl <- ss.all2 %>%
select(l.PIRI, l.SO, l.other, l.TFT, l.BA_HA, l.BA_piri, l.Mineral, avgLD_l, l.Veg_Total, Treat_Type)
ggpairs(ss.numl, aes(color = Treat_Type))
ggpairs(ss.numl)
rm(ss.num,
ss.numl)
Can see the correlation coefficients for linear (Pearsons) relationships. None of them appear very strong, except for ones that are analogs (avg LD vs mineral soil exposure; ba/ha vs piri ba/ha)
Full dataset is called ss.all2.
tapply(ss.all2$PIRI, ss.all2$Region, summary)
## $ALB
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.000 0.000 0.000 0.418 0.000 15.000
##
## $LI
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.0000 0.0000 0.0000 0.6164 1.0000 5.0000
##
## $MA
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.000 0.000 0.000 1.221 0.000 59.000
##
## $ME
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.0000 0.0000 0.0000 0.2364 0.0000 6.0000
##
## $NH
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.000 0.000 0.000 0.383 0.000 9.000
tapply(ss.all2$PIRI, ss.all2$Treat_Type, summary)
## $Control
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.000000 0.000000 0.000000 0.008547 0.000000 1.000000
##
## $FallRx
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.000 0.000 0.000 1.294 0.750 34.000
##
## $Harvest
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.0000 0.0000 0.0000 0.5714 1.0000 4.0000
##
## $MowRx
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.0000 0.0000 0.0000 0.8677 0.0000 59.0000
##
## $SpringRx
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.0000 0.0000 0.0000 0.5747 0.0000 6.0000
# PIRI exist in every region and TT
Model step wise elimination with treatment type included
ss.m1 <- glmmTMB(PIRI ~ Treat_Type + l.SO + l.other + l.BA_HA + l.BA_piri + l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all2,
family = poisson)
AIC(ss.m1) #724.86
## [1] 724.869
#would like to test ba vs ba piri
ss.m2a <- glmmTMB(PIRI ~ Treat_Type + l.SO + l.other + l.BA_HA + l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all2,
family = poisson)
AIC(ss.m2a) #723.6
## [1] 723.6238
lrtest(ss.m1, ss.m2a) # p = .4
## Likelihood ratio test
##
## Model 1: PIRI ~ Treat_Type + l.SO + l.other + l.BA_HA + l.BA_piri + l.Mineral +
## avgLD_l + l.Veg_Total + offset(l.TFT) + (1 | Site/Plot_No)
## Model 2: PIRI ~ Treat_Type + l.SO + l.other + l.BA_HA + l.Mineral + avgLD_l +
## l.Veg_Total + offset(l.TFT) + (1 | Site/Plot_No)
## #Df LogLik Df Chisq Pr(>Chisq)
## 1 14 -348.43
## 2 13 -348.81 -1 0.7548 0.385
ss.m2b <- glmmTMB(PIRI ~ Treat_Type + l.SO + l.other + l.BA_piri + l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all2,
family = poisson)
AIC(ss.m2b) #725.9
## [1] 725.932
lrtest(ss.m1, ss.m2b) # p =.08, so not important
## Likelihood ratio test
##
## Model 1: PIRI ~ Treat_Type + l.SO + l.other + l.BA_HA + l.BA_piri + l.Mineral +
## avgLD_l + l.Veg_Total + offset(l.TFT) + (1 | Site/Plot_No)
## Model 2: PIRI ~ Treat_Type + l.SO + l.other + l.BA_piri + l.Mineral +
## avgLD_l + l.Veg_Total + offset(l.TFT) + (1 | Site/Plot_No)
## #Df LogLik Df Chisq Pr(>Chisq)
## 1 14 -348.43
## 2 13 -349.97 -1 3.0629 0.0801 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
rm(ss.m2a, ss.m2b)
# test ld vs mineral exposure
ss.m3a <- glmmTMB(PIRI ~ Treat_Type + l.SO + l.other + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all2,
family = poisson)
AIC(ss.m3a) #723.0
## [1] 723.0109
lrtest(ss.m1, ss.m3a) #p=.25
## Likelihood ratio test
##
## Model 1: PIRI ~ Treat_Type + l.SO + l.other + l.BA_HA + l.BA_piri + l.Mineral +
## avgLD_l + l.Veg_Total + offset(l.TFT) + (1 | Site/Plot_No)
## Model 2: PIRI ~ Treat_Type + l.SO + l.other + avgLD_l + l.Veg_Total +
## offset(l.TFT) + (1 | Site/Plot_No)
## #Df LogLik Df Chisq Pr(>Chisq)
## 1 14 -348.43
## 2 11 -350.51 -3 4.1419 0.2465
ss.m3b <- glmmTMB(PIRI ~ Treat_Type + l.SO + l.other + l.Mineral + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all2,
family = poisson)
AIC(ss.m3b) #730.1
## [1] 730.1389
lrtest(ss.m1, ss.m3b) # p = 0.01, loss of avg ld significant
## Likelihood ratio test
##
## Model 1: PIRI ~ Treat_Type + l.SO + l.other + l.BA_HA + l.BA_piri + l.Mineral +
## avgLD_l + l.Veg_Total + offset(l.TFT) + (1 | Site/Plot_No)
## Model 2: PIRI ~ Treat_Type + l.SO + l.other + l.Mineral + l.Veg_Total +
## offset(l.TFT) + (1 | Site/Plot_No)
## #Df LogLik Df Chisq Pr(>Chisq)
## 1 14 -348.43
## 2 11 -354.07 -3 11.27 0.01035 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
rm(ss.m1, ss.m3b)
# test l.other
ss.m4 <- glmmTMB(PIRI ~ Treat_Type + l.SO + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all2,
family = poisson)
AIC(ss.m4) #722.6
## [1] 722.6016
lrtest(ss.m3a, ss.m4) #p=.2, can drop other
## Likelihood ratio test
##
## Model 1: PIRI ~ Treat_Type + l.SO + l.other + avgLD_l + l.Veg_Total +
## offset(l.TFT) + (1 | Site/Plot_No)
## Model 2: PIRI ~ Treat_Type + l.SO + avgLD_l + l.Veg_Total + offset(l.TFT) +
## (1 | Site/Plot_No)
## #Df LogLik Df Chisq Pr(>Chisq)
## 1 11 -350.51
## 2 10 -351.30 -1 1.5908 0.2072
rm(ss.m3a)
# test l.SO
ss.m5 <- glmmTMB(PIRI ~ Treat_Type + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all2,
family = poisson)
AIC(ss.m5) #725.5
## [1] 725.4879
lrtest(ss.m4, ss.m5) #p = 0.03, l.so important
## Likelihood ratio test
##
## Model 1: PIRI ~ Treat_Type + l.SO + avgLD_l + l.Veg_Total + offset(l.TFT) +
## (1 | Site/Plot_No)
## Model 2: PIRI ~ Treat_Type + avgLD_l + l.Veg_Total + offset(l.TFT) + (1 |
## Site/Plot_No)
## #Df LogLik Df Chisq Pr(>Chisq)
## 1 10 -351.30
## 2 9 -353.74 -1 4.8863 0.02707 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
rm(ss.m5)
#test veg
ss.m6 <- glmmTMB(PIRI ~ Treat_Type + l.SO + avgLD_l + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all2,
family = poisson)
AIC(ss.m6) #727
## [1] 726.9952
lrtest(ss.m4, ss.m6) # p = 0.011, veg important
## Likelihood ratio test
##
## Model 1: PIRI ~ Treat_Type + l.SO + avgLD_l + l.Veg_Total + offset(l.TFT) +
## (1 | Site/Plot_No)
## Model 2: PIRI ~ Treat_Type + l.SO + avgLD_l + offset(l.TFT) + (1 | Site/Plot_No)
## #Df LogLik Df Chisq Pr(>Chisq)
## 1 10 -351.3
## 2 9 -354.5 -1 6.3935 0.01145 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
rm(ss.m6)
do pairwise on variables of interest from reduced model
ss.pair <- ss.all2 %>%
select(Region, Treat_Type, l.PIRI, l.SO, l.Veg_Total, avgLD_l, l.TFT)
ggpairs(ss.pair, aes(color = Treat_Type))
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
## `stat_bin()` using `bins = 30`. Pick better value with `binwidth`.
rm(ss.pair)
Not seeing any clear, strong correlations
Best model:
ss.m4 <- glmmTMB(PIRI ~ Treat_Type + l.SO + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all2,
family = poisson)
summary(ss.m4) #722.6
## Family: poisson ( log )
## Formula:
## PIRI ~ Treat_Type + l.SO + avgLD_l + l.Veg_Total + offset(l.TFT) +
## (1 | Site/Plot_No)
## Data: ss.all2
##
## AIC BIC logLik deviance df.resid
## 722.6 764.7 -351.3 702.6 488
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## Plot_No:Site (Intercept) 4.656 2.158
## Site (Intercept) 2.247 1.499
## Number of obs: 498, groups: Plot_No:Site, 498; Site, 47
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -5.2343 1.7869 -2.929 0.00340 **
## Treat_TypeFallRx 6.8805 1.4362 4.791 0.00000166 ***
## Treat_TypeHarvest 6.4779 1.5011 4.315 0.00001594 ***
## Treat_TypeMowRx 5.2750 1.4183 3.719 0.00020 ***
## Treat_TypeSpringRx 6.7833 1.4406 4.709 0.00000250 ***
## l.SO -0.5173 0.2390 -2.164 0.03045 *
## avgLD_l -1.1968 0.4340 -2.757 0.00583 **
## l.Veg_Total -0.8030 0.3279 -2.449 0.01431 *
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
check_collinearity(ss.m4) # all with low collinearity
## # Check for Multicollinearity
##
## Low Correlation
##
## Term VIF VIF 95% CI Increased SE Tolerance Tolerance 95% CI
## Treat_Type 1.16 [1.08, 1.32] 1.08 0.86 [0.76, 0.93]
## l.SO 1.04 [1.00, 1.40] 1.02 0.96 [0.71, 1.00]
## avgLD_l 1.10 [1.04, 1.28] 1.05 0.91 [0.78, 0.97]
## l.Veg_Total 1.04 [1.00, 1.48] 1.02 0.97 [0.67, 1.00]
# #checking nbinom2
# ss.m4a <- glmmTMB(PIRI ~ Treat_Type + l.SO + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
# data = ss.all2,
# family = nbinom2)
# AIC(ss.m4a) #higher with nbinom 1 & 2
Checking model fit:
ss.m4_sr <- simulateResiduals(ss.m4, n = 1000, plot = TRUE) #looks pretty good
testResiduals(ss.m4_sr) #passes uniformity, dispersion, and outliers
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.03794, p-value = 0.4704
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.0030764, p-value = 0.22
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa bootstrapped outlier test
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 498, p-value = 0.98
## alternative hypothesis: two.sided
## percent confidence interval:
## 0.000000000 0.006024096
## sample estimates:
## outlier frequency (expected: 0.00140562248995984 )
## 0
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.03794, p-value = 0.4704
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.0030764, p-value = 0.22
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa bootstrapped outlier test
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 498, p-value = 0.98
## alternative hypothesis: two.sided
## percent confidence interval:
## 0.000000000 0.006024096
## sample estimates:
## outlier frequency (expected: 0.00140562248995984 )
## 0
testQuantiles(ss.m4_sr) #passes
##
## Test for location of quantiles via qgam
##
## data: simulationOutput
## p-value = 0.6634
## alternative hypothesis: both
testZeroInflation(ss.m4_sr) #passes
##
## DHARMa zero-inflation test via comparison to expected zeros with
## simulation under H0 = fitted model
##
## data: simulationOutput
## ratioObsSim = 0.99577, p-value = 0.882
## alternative hypothesis: two.sided
Time to pairwise test
emmeans(ss.m4, specs = pairwise ~ Treat_Type, adjust = 'Tukey', type = 'response')
## $emmeans
## Treat_Type rate SE df asymp.LCL asymp.UCL
## Control 0.000147 0.000196 Inf 0.0000108 0.002
## FallRx 0.143080 0.098068 Inf 0.0373384 0.548
## Harvest 0.095658 0.081203 Inf 0.0181195 0.505
## MowRx 0.028729 0.019962 Inf 0.0073602 0.112
## SpringRx 0.129823 0.093717 Inf 0.0315418 0.534
##
## Confidence level used: 0.95
## Intervals are back-transformed from the log scale
##
## $contrasts
## contrast ratio SE df null z.ratio p.value
## Control / FallRx 0.00103 0.00148 Inf 1 -4.791 <.0001
## Control / Harvest 0.00154 0.00231 Inf 1 -4.315 0.0002
## Control / MowRx 0.00512 0.00726 Inf 1 -3.719 0.0019
## Control / SpringRx 0.00113 0.00163 Inf 1 -4.709 <.0001
## FallRx / Harvest 1.49575 1.48408 Inf 1 0.406 0.9943
## FallRx / MowRx 4.98025 4.23364 Inf 1 1.889 0.3234
## FallRx / SpringRx 1.10211 0.97169 Inf 1 0.110 1.0000
## Harvest / MowRx 3.32961 3.28774 Inf 1 1.218 0.7408
## Harvest / SpringRx 0.73683 0.73931 Inf 1 -0.304 0.9981
## MowRx / SpringRx 0.22130 0.18901 Inf 1 -1.766 0.3936
##
## P value adjustment: tukey method for comparing a family of 5 estimates
## Tests are performed on the log scale
FallRx, Harvest, MowRx, SpringRx all significantly different from Control - no treatment significantly different from another
Model ss.m4 uses Treatment Type, Scrub Oak presence, average leaf litter depth, and total veg cover as predictors. Make a graph for each
ss.p1 <- ggpredict(ss.m4, terms = c("avgLD_l", "Treat_Type"))
ggplot(ss.p1, aes(x, predicted, color = group))+
geom_line(linewidth = 1.25, show.legend = FALSE) +
scale_color_manual(values = c("#D8B70A", "#02401B", "#A2A475", "#81A88D", "#972D15"))+
theme_classic()+
theme(panel.background = element_blank()) +
theme(axis.text = element_text(size = 18), axis.title = element_text(size = 24))+
labs(x = "Average leaf litter depth \n (log transformed)",
y = NULL)
#y = "Pitch pine stems \n (adjusted for time from treatment)")
ggsave(filename = "SS_PIRI_avgld.tiff", path = "Plots/PIRI_GLMM", width = 6, height = 4.2, device = "tiff", dpi = 700)
# PIRI SS plot 2 -------------------
ss.p2 <- ggpredict(ss.m4, terms = c("l.Veg_Total", "Treat_Type"))
ggplot(ss.p2, aes(x, predicted, color = group))+
geom_line(linewidth = 1.25, show.legend = FALSE) +
scale_color_manual(values = c("#D8B70A", "#02401B", "#A2A475", "#81A88D", "#972D15"))+
theme_classic()+
theme(panel.background = element_blank()) +
theme(axis.text = element_text(size = 18), axis.title = element_text(size = 24))+
labs(x = "Average understory vegetation \n cover (log transformed)",
y = "Pitch pine stems \n (adjusted for time)")
ggsave(filename = "SS_PIRI_veg.tiff", path = "Plots/PIRI_GLMM", width = 6, height = 4.2, device = "tiff", dpi = 700)
# PIRI SS plot 3 -------------------
ss.p3 <- ggpredict(ss.m4, terms = c("l.SO", "Treat_Type"))
ggplot(ss.p3, aes(x, predicted, color = group))+
geom_line(linewidth = 1.25, show.legend = F) +
scale_color_manual(values = c("#D8B70A", "#02401B", "#A2A475", "#81A88D", "#972D15"))+
theme_classic()+
theme(panel.background = element_blank()) +
theme(axis.text = element_text(size = 18), axis.title = element_text(size = 24)) +
#legend.position = "right", legend.text = element_text(size = 18), legend.title = element_text(size = 22))+
labs(x = "Shrub oak seedling counts \n (log transformed)",
y = NULL, #"Pitch pine stems \n (adjusted for time)",
color = "Treatment Type")
ggsave(filename = "SS_PIRI_SO.tiff", path = "Plots/PIRI_GLMM", width = 6, height = 4.2, device = "tiff", dpi = 700)
tapply(ss.all2$Shrub_Oak, ss.all2$Region, summary)
## $ALB
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.00 0.00 0.00 2.18 3.00 18.00
##
## $LI
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.000 0.000 0.000 1.055 0.000 10.000
##
## $MA
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.000 0.000 0.000 2.357 3.000 21.000
##
## $ME
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.0 0.0 2.0 4.8 5.0 39.0
##
## $NH
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.00 0.00 1.00 2.84 4.00 19.00
tapply(ss.all2$Shrub_Oak, ss.all2$Treat_Type, summary)
## $Control
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.000 0.000 0.000 2.393 3.000 39.000
##
## $FallRx
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.000 0.000 1.500 3.735 5.000 24.000
##
## $Harvest
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.000 0.000 0.000 0.875 0.000 9.000
##
## $MowRx
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.000 0.000 1.000 2.838 4.000 21.000
##
## $SpringRx
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.000 0.000 0.000 1.621 2.000 18.000
# SO exist in every region and TT
Begin model elimination - did this previously with poission, but had distribution issues. Then did with nbinom1, better, but best results using zi ~Region
so.ss1a <- glmmTMB(Shrub_Oak ~ Treat_Type + l.PIRI + l.other + l.BA_HA + l.BA_piri + l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all2,
ziformula = ~Region,
family = nbinom1)
AIC(so.ss1a) #1850.1
## [1] 1850.113
#test piri BA
so.ss2a <- glmmTMB(Shrub_Oak ~ Treat_Type + l.PIRI + l.other + l.BA_HA + l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all2,
ziformula = ~Region,
family = nbinom1)
AIC(so.ss2a) #1848.1
## [1] 1848.141
# test total ba
so.ss3a <- glmmTMB(Shrub_Oak ~ Treat_Type + l.PIRI + l.other + l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all2,
ziformula = ~Region,
family = nbinom1)
AIC(so.ss3a) #1846.2
## [1] 1846.182
# test mineral
so.ss4a <- glmmTMB(Shrub_Oak ~ Treat_Type + l.PIRI + l.other + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all2,
ziformula = ~Region,
family = nbinom1)
AIC(so.ss4a) #1844.3
## [1] 1844.342
# test litter depth
so.ss5a <- glmmTMB(Shrub_Oak ~ Treat_Type + l.PIRI + l.other + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all2,
ziformula = ~Region,
family = nbinom1)
AIC(so.ss5a) #1842.5
## [1] 1842.527
lrtest(so.ss5a, so.ss4a, so.ss3a, so.ss2a, so.ss1a)
## Likelihood ratio test
##
## Model 1: Shrub_Oak ~ Treat_Type + l.PIRI + l.other + l.Veg_Total + offset(l.TFT) +
## (1 | Site/Plot_No)
## Model 2: Shrub_Oak ~ Treat_Type + l.PIRI + l.other + avgLD_l + l.Veg_Total +
## offset(l.TFT) + (1 | Site/Plot_No)
## Model 3: Shrub_Oak ~ Treat_Type + l.PIRI + l.other + l.Mineral + avgLD_l +
## l.Veg_Total + offset(l.TFT) + (1 | Site/Plot_No)
## Model 4: Shrub_Oak ~ Treat_Type + l.PIRI + l.other + l.BA_HA + l.Mineral +
## avgLD_l + l.Veg_Total + offset(l.TFT) + (1 | Site/Plot_No)
## Model 5: Shrub_Oak ~ Treat_Type + l.PIRI + l.other + l.BA_HA + l.BA_piri +
## l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1 |
## Site/Plot_No)
## #Df LogLik Df Chisq Pr(>Chisq)
## 1 16 -905.26
## 2 17 -905.17 1 0.1848 0.6673
## 3 18 -905.09 1 0.1602 0.6889
## 4 19 -905.07 1 0.0407 0.8400
## 5 20 -905.06 1 0.0280 0.8671
# test other seedling
so.ss6a <- glmmTMB(Shrub_Oak ~ Treat_Type + l.PIRI + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all2,
ziformula = ~Region,
family = nbinom1)
AIC(so.ss6a) #1843
## [1] 1843.006
lrtest(so.ss5a, so.ss6a) # p = .11
## Likelihood ratio test
##
## Model 1: Shrub_Oak ~ Treat_Type + l.PIRI + l.other + l.Veg_Total + offset(l.TFT) +
## (1 | Site/Plot_No)
## Model 2: Shrub_Oak ~ Treat_Type + l.PIRI + l.Veg_Total + offset(l.TFT) +
## (1 | Site/Plot_No)
## #Df LogLik Df Chisq Pr(>Chisq)
## 1 16 -905.26
## 2 15 -906.50 -1 2.4795 0.1153
# test PIRI seedling
so.ss7a <- glmmTMB(Shrub_Oak ~ Treat_Type + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all2,
ziformula = ~Region,
family = nbinom1)
AIC(so.ss7a) #1843.8
## [1] 1843.878
lrtest(so.ss6a, so.ss7a) # p = 0.09
## Likelihood ratio test
##
## Model 1: Shrub_Oak ~ Treat_Type + l.PIRI + l.Veg_Total + offset(l.TFT) +
## (1 | Site/Plot_No)
## Model 2: Shrub_Oak ~ Treat_Type + l.Veg_Total + offset(l.TFT) + (1 | Site/Plot_No)
## #Df LogLik Df Chisq Pr(>Chisq)
## 1 15 -906.50
## 2 14 -907.94 -1 2.8718 0.09014 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
# test veg cover
so.ss8a <- glmmTMB(Shrub_Oak ~ Treat_Type + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all2,
ziformula = ~Region,
family = nbinom1)
AIC(so.ss8a) #1845.2
## [1] 1845.213
lrtest(so.ss7a, so.ss8a) # p = 0.06
## Likelihood ratio test
##
## Model 1: Shrub_Oak ~ Treat_Type + l.Veg_Total + offset(l.TFT) + (1 | Site/Plot_No)
## Model 2: Shrub_Oak ~ Treat_Type + offset(l.TFT) + (1 | Site/Plot_No)
## #Df LogLik Df Chisq Pr(>Chisq)
## 1 14 -907.94
## 2 13 -909.61 -1 3.3354 0.06781 .
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
rm(so.ss1a,
so.ss2a,
so.ss3a,
so.ss4a,
so.ss5a,
so.ss6a,
so.ss7a)
Tested residuals on so.ss7, so.ss6 as well - both were worse
# seemly best model
so.ss8a <- glmmTMB(Shrub_Oak ~ Treat_Type + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all2,
ziformula = ~Region,
family = nbinom1)
AIC(so.ss8a) #1845.2
## [1] 1845.213
summary(so.ss8a)
## Family: nbinom1 ( log )
## Formula: Shrub_Oak ~ Treat_Type + offset(l.TFT) + (1 | Site/Plot_No)
## Zero inflation: ~Region
## Data: ss.all2
##
## AIC BIC logLik deviance df.resid
## 1845.2 1900.0 -909.6 1819.2 485
##
## Random effects:
##
## Conditional model:
## Groups Name Variance Std.Dev.
## Plot_No:Site (Intercept) 0.1959 0.4426
## Site (Intercept) 1.1781 1.0854
## Number of obs: 498, groups: Plot_No:Site, 498; Site, 47
##
## Dispersion parameter for nbinom1 family (): 2.66
##
## Conditional model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -3.1156 0.4019 -7.753 0.00000000000000898 ***
## Treat_TypeFallRx 2.5356 0.5532 4.583 0.00000457282661421 ***
## Treat_TypeHarvest 1.5420 0.6928 2.226 0.026 *
## Treat_TypeMowRx 2.8265 0.5191 5.445 0.00000005188937651 ***
## Treat_TypeSpringRx 2.8205 0.5672 4.972 0.00000066162708330 ***
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
##
## Zero-inflation model:
## Estimate Std. Error z value Pr(>|z|)
## (Intercept) -1.1038 0.4144 -2.664 0.00773 **
## RegionLI 1.0358 0.7268 1.425 0.15414
## RegionMA 0.2202 0.4805 0.458 0.64670
## RegionME -1.3485 0.6592 -2.046 0.04080 *
## RegionNH -16.8256 3461.9033 -0.005 0.99612
## ---
## Signif. codes: 0 '***' 0.001 '**' 0.01 '*' 0.05 '.' 0.1 ' ' 1
so.ss8a_sr <- simulateResiduals(so.ss8a, n = 1000, plot = TRUE)
testResiduals(so.ss8a_sr) #passes
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.028938, p-value = 0.7985
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.23625, p-value = 0.092
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa bootstrapped outlier test
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 498, p-value = 1
## alternative hypothesis: two.sided
## percent confidence interval:
## 0.00000000 0.01109438
## sample estimates:
## outlier frequency (expected: 0.00184738955823293 )
## 0
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.028938, p-value = 0.7985
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.23625, p-value = 0.092
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa bootstrapped outlier test
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 498, p-value = 1
## alternative hypothesis: two.sided
## percent confidence interval:
## 0.00000000 0.01109438
## sample estimates:
## outlier frequency (expected: 0.00184738955823293 )
## 0
testQuantiles(so.ss8a_sr) # p = 0.000006; this is the best so far
##
## Test for location of quantiles via qgam
##
## data: simulationOutput
## p-value = 0.000006085
## alternative hypothesis: both
emmeans(so.ss8a, specs = pairwise ~ Treat_Type, adjust = 'Tukey', type = 'response')
## $emmeans
## Treat_Type response SE df asymp.LCL asymp.UCL
## Control 0.269 0.108 Inf 0.123 0.592
## FallRx 3.401 1.353 Inf 1.560 7.415
## Harvest 1.259 0.769 Inf 0.380 4.171
## MowRx 4.549 1.572 Inf 2.312 8.954
## SpringRx 4.522 1.946 Inf 1.946 10.509
##
## Confidence level used: 0.95
## Intervals are back-transformed from the log scale
##
## $contrasts
## contrast ratio SE df null z.ratio p.value
## Control / FallRx 0.0792 0.0438 Inf 1 -4.583 <.0001
## Control / Harvest 0.2139 0.1482 Inf 1 -2.226 0.1701
## Control / MowRx 0.0592 0.0307 Inf 1 -5.445 <.0001
## Control / SpringRx 0.0596 0.0338 Inf 1 -4.972 <.0001
## FallRx / Harvest 2.7009 1.9435 Inf 1 1.381 0.6401
## FallRx / MowRx 0.7476 0.3845 Inf 1 -0.566 0.9800
## FallRx / SpringRx 0.7521 0.4323 Inf 1 -0.496 0.9878
## Harvest / MowRx 0.2768 0.1925 Inf 1 -1.847 0.3464
## Harvest / SpringRx 0.2785 0.2008 Inf 1 -1.773 0.3894
## MowRx / SpringRx 1.0060 0.5436 Inf 1 0.011 1.0000
##
## P value adjustment: tukey method for comparing a family of 5 estimates
## Tests are performed on the log scale
FallRx, MowRx, and SpringRx significantly different than control; none of the treatments significantly different from each other
Need to figure out better use of emmeans (potentially)
so.ss1.plot <- emmeans(so.ss8a, specs = pairwise ~ Treat_Type, adjust = 'Tukey', type = 'response')
plot <- plot(so.ss1.plot, horizontal=FALSE, comparisons=TRUE, color = c("#D8B70A", "#02401B", "#A2A475", "#81A88D", "#972D15")) + theme_bw()
plot+
theme_classic()+
theme(panel.background = element_blank()) +
theme(axis.text = element_text(size = 14), axis.title = element_text(size = 20), legend.position = "right", legend.text = element_text(size = 14), legend.title = element_text(size = 18)) +
labs(x = "Shrub Oak Stems \n (corrected for time from treatment)",
z = "Treatment Type",
color = "Treatment Type")
ss_germ <- merge(ss_merge, time_since, by = "Site")
germ_piri <- ss_germ %>%
filter(Species_Groups == "PIRI") %>%
rename(PIRI_Germ = Germinate) %>%
select(Plot_No, PIRI_Germ)
ss.all3 <- merge(ss.all2, germ_piri, by = "Plot_No")
tapply(ss.all3$PIRI_Germ, ss.all3$Region, summary)
## $ALB
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.00000 0.00000 0.00000 0.02459 0.00000 1.00000
##
## $LI
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.0000 0.0000 0.0000 0.3425 0.0000 3.0000
##
## $MA
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.00000 0.00000 0.00000 0.02597 0.00000 1.00000
##
## $ME
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0 0 0 0 0 0
##
## $NH
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0 0 0 0 0 0
tapply(ss.all3$PIRI_Germ, ss.all3$Treat_Type, summary)
## $Control
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0 0 0 0 0 0
##
## $FallRx
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.000000 0.000000 0.000000 0.009804 0.000000 1.000000
##
## $Harvest
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.00000 0.00000 0.00000 0.05357 0.00000 3.00000
##
## $MowRx
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.00000 0.00000 0.00000 0.03676 0.00000 1.00000
##
## $SpringRx
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.0000 0.0000 0.0000 0.2644 0.0000 3.0000
tapply(ss.all3$PIRI_Germ, ss.all3$Time_from_Treat, summary)
## $`1`
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.0000 0.0000 0.0000 0.7576 1.0000 3.0000
##
## $`2`
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.0000 0.0000 0.0000 0.0303 0.0000 1.0000
##
## $`3`
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0 0 0 0 0 0
##
## $`4`
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.00000 0.00000 0.00000 0.01786 0.00000 1.00000
##
## $`5`
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0.00000 0.00000 0.00000 0.08824 0.00000 3.00000
##
## $`6`
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0 0 0 0 0 0
##
## $`7`
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0 0 0 0 0 0
##
## $`8`
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0 0 0 0 0 0
##
## $`32`
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0 0 0 0 0 0
##
## $`33`
## Min. 1st Qu. Median Mean 3rd Qu. Max.
## 0 0 0 0 0 0
I did all this but only ~20 germs plots come through due to 1/2 for adding veg etc data. It is too few to model and get anything useful from (though I spent a bunch of time doing this thinking it was a good idea)
ger.m1 <- glmmTMB(PIRI_Germ ~ Treat_Type + l.SO + l.other + l.BA_HA + l.BA_piri + l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all3,
family = nbinom1)
# Won't converge
#remove other
ger.m2 <- glmmTMB(PIRI_Germ ~ Treat_Type + l.SO + l.BA_HA + l.BA_piri + l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all3,
family = nbinom1)
AIC(ger.m2) #189.2
## [1] 189.1923
#test so
ger.m3 <- glmmTMB(PIRI_Germ ~ Treat_Type +l.BA_HA + l.BA_piri + l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all3,
family = nbinom1)
AIC(ger.m3) #187.4
## [1] 187.3598
#test piri ba
ger.m4 <- glmmTMB(PIRI_Germ ~ Treat_Type + l.BA_HA + l.Mineral + avgLD_l + l.Veg_Total + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all3,
family = nbinom1)
AIC(ger.m4) #186.2
## [1] 186.2114
#test veg
ger.m5 <- glmmTMB(PIRI_Germ ~ Treat_Type + l.BA_HA + avgLD_l + l.Mineral + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all3,
family = nbinom1)
AIC(ger.m5) #186.8
## [1] 186.7959
lrtest(ger.m2, ger.m3, ger.m4, ger.m5)
## Likelihood ratio test
##
## Model 1: PIRI_Germ ~ Treat_Type + l.SO + l.BA_HA + l.BA_piri + l.Mineral +
## avgLD_l + l.Veg_Total + offset(l.TFT) + (1 | Site/Plot_No)
## Model 2: PIRI_Germ ~ Treat_Type + l.BA_HA + l.BA_piri + l.Mineral + avgLD_l +
## l.Veg_Total + offset(l.TFT) + (1 | Site/Plot_No)
## Model 3: PIRI_Germ ~ Treat_Type + l.BA_HA + l.Mineral + avgLD_l + l.Veg_Total +
## offset(l.TFT) + (1 | Site/Plot_No)
## Model 4: PIRI_Germ ~ Treat_Type + l.BA_HA + avgLD_l + l.Mineral + offset(l.TFT) +
## (1 | Site/Plot_No)
## #Df LogLik Df Chisq Pr(>Chisq)
## 1 14 -80.596
## 2 13 -80.680 -1 0.1675 0.6823
## 3 12 -81.106 -1 0.8516 0.3561
## 4 11 -82.398 -1 2.5844 0.1079
#test mineral
ger.m6 <- glmmTMB(PIRI_Germ ~ Treat_Type + l.BA_HA + avgLD_l + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all3,
family = nbinom1)
AIC(ger.m6) #185.7
## [1] 185.7089
lrtest(ger.m5, ger.m4)
## Likelihood ratio test
##
## Model 1: PIRI_Germ ~ Treat_Type + l.BA_HA + avgLD_l + l.Mineral + offset(l.TFT) +
## (1 | Site/Plot_No)
## Model 2: PIRI_Germ ~ Treat_Type + l.BA_HA + l.Mineral + avgLD_l + l.Veg_Total +
## offset(l.TFT) + (1 | Site/Plot_No)
## #Df LogLik Df Chisq Pr(>Chisq)
## 1 11 -82.398
## 2 12 -81.106 1 2.5844 0.1079
#won't converge without l.BA HA or avgLD (i.e. with only one as predictor)
ger.m7 <- glmmTMB(PIRI_Germ ~ Treat_Type + avgLD_l + l.BA_piri + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all3,
family = nbinom1)
AIC(ger.m7) #185.6
## [1] 185.5667
check_collinearity(ger.m7)
## # Check for Multicollinearity
##
## Low Correlation
##
## Term VIF VIF 95% CI Increased SE Tolerance Tolerance 95% CI
## Treat_Type 1.07 [1.02, 1.28] 1.03 0.94 [0.78, 0.98]
## avgLD_l 1.19 [1.10, 1.36] 1.09 0.84 [0.74, 0.91]
## l.BA_piri 1.20 [1.11, 1.37] 1.10 0.83 [0.73, 0.90]
ger.m7a <- glmmTMB(PIRI_Germ ~ Treat_Type + avgLD_l + l.BA_piri + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all3,
family = poisson)
AIC(ger.m7) #185.5
## [1] 185.5667
ger.m7b <- glmmTMB(PIRI_Germ ~ Treat_Type + avgLD_l + l.BA_piri + offset(l.TFT) + (1|Site/Plot_No),
data = ss.all3,
ziformula = ~Region,
family = nbinom1)
AIC(ger.m7) #185.6, won't work with nbinom2 (zi or not) or poisson
## [1] 185.5667
ger.m6_sr <- simulateResiduals(ger.m6, n = 1000, plot = TRUE) #passes
testResiduals(ger.m6_sr) #passes
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.050094, p-value = 0.1642
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.0000066907, p-value = 0.234
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa bootstrapped outlier test
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 498, p-value = 1
## alternative hypothesis: two.sided
## percent confidence interval:
## 0.000000000 0.008032129
## sample estimates:
## outlier frequency (expected: 0.000742971887550201 )
## 0
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.050094, p-value = 0.1642
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.0000066907, p-value = 0.234
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa bootstrapped outlier test
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 498, p-value = 1
## alternative hypothesis: two.sided
## percent confidence interval:
## 0.000000000 0.008032129
## sample estimates:
## outlier frequency (expected: 0.000742971887550201 )
## 0
testZeroInflation(ger.m6_sr) #passes
##
## DHARMa zero-inflation test via comparison to expected zeros with
## simulation under H0 = fitted model
##
## data: simulationOutput
## ratioObsSim = 1.0193, p-value = 0.51
## alternative hypothesis: two.sided
ger.m7_sr <- simulateResiduals(ger.m7, n = 1000, plot = TRUE) #passes, better fit than 7
testResiduals(ger.m7_sr) #passes
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.042718, p-value = 0.3235
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.0001079, p-value = 0.256
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa bootstrapped outlier test
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 498, p-value = 1
## alternative hypothesis: two.sided
## percent confidence interval:
## 0.00000000 0.01711847
## sample estimates:
## outlier frequency (expected: 0.000903614457831325 )
## 0
## $uniformity
##
## Asymptotic one-sample Kolmogorov-Smirnov test
##
## data: simulationOutput$scaledResiduals
## D = 0.042718, p-value = 0.3235
## alternative hypothesis: two-sided
##
##
## $dispersion
##
## DHARMa nonparametric dispersion test via sd of residuals fitted vs.
## simulated
##
## data: simulationOutput
## dispersion = 0.0001079, p-value = 0.256
## alternative hypothesis: two.sided
##
##
## $outliers
##
## DHARMa bootstrapped outlier test
##
## data: simulationOutput
## outliers at both margin(s) = 0, observations = 498, p-value = 1
## alternative hypothesis: two.sided
## percent confidence interval:
## 0.00000000 0.01711847
## sample estimates:
## outlier frequency (expected: 0.000903614457831325 )
## 0
testZeroInflation(ger.m7_sr) #passes
##
## DHARMa zero-inflation test via comparison to expected zeros with
## simulation under H0 = fitted model
##
## data: simulationOutput
## ratioObsSim = 1.0203, p-value = 0.494
## alternative hypothesis: two.sided
emmeans(ger.m7, specs = pairwise ~ Treat_Type, adjust = 'Tukey', type = 'response') #none different from each other
## $emmeans
## Treat_Type response SE df asymp.LCL asymp.UCL
## Control 0.00000 0.00000003 Inf 0.0000000 Inf
## FallRx 0.00132 0.00245857 Inf 0.0000349 0
## Harvest 0.00407 0.00819070 Inf 0.0000787 0
## MowRx 0.00499 0.00782616 Inf 0.0002314 0
## SpringRx 0.07132 0.10866119 Inf 0.0036013 1
##
## Confidence level used: 0.95
## Intervals are back-transformed from the log scale
##
## $contrasts
## contrast ratio SE df null z.ratio p.value
## Control / FallRx 0.0000 0.0000203 Inf 1 -0.002 1.0000
## Control / Harvest 0.0000 0.0000066 Inf 1 -0.002 1.0000
## Control / MowRx 0.0000 0.0000054 Inf 1 -0.002 1.0000
## Control / SpringRx 0.0000 0.0000004 Inf 1 -0.002 1.0000
## FallRx / Harvest 0.3256 0.7638313 Inf 1 -0.478 0.9893
## FallRx / MowRx 0.2653 0.5086896 Inf 1 -0.692 0.9583
## FallRx / SpringRx 0.0186 0.0363034 Inf 1 -2.039 0.2472
## Harvest / MowRx 0.8147 1.7094781 Inf 1 -0.098 1.0000
## Harvest / SpringRx 0.0570 0.1223393 Inf 1 -1.335 0.6691
## MowRx / SpringRx 0.0700 0.1151496 Inf 1 -1.617 0.4865
##
## P value adjustment: tukey method for comparing a family of 5 estimates
## Tests are performed on the log scale
This analysis can be seen in the pair plots at the top of script
Veg cover, average litter depth, and time from treatment all have a clear negative relationship (as they decrease, total increases), some more significant than others
Basal area per hectare and mineral soil exposure have positive relationship (as they increase, total increase), these relationships appear weaker than the above negative relationships
More scatterplots hidden below; see pairwise at beginning of script
Hidden below is the original analysis and step by step model elimination conducted without treatment type included. Best model without treatment type had a higher AIC and is underdispersed according to DHARMa tests